{
  "nodes": [
    {
      "id": "toolAgent_0",
      "position": {
        "x": 1293.1879986131244,
        "y": 191.1741937636126
      },
      "type": "customNode",
      "data": {
        "id": "toolAgent_0",
        "label": "Tool Agent",
        "version": 2,
        "name": "toolAgent",
        "type": "AgentExecutor",
        "baseClasses": [
          "AgentExecutor",
          "BaseChain",
          "Runnable"
        ],
        "category": "Agents",
        "description": "Agent that uses Function Calling to pick the tools and args to call",
        "inputParams": [
          {
            "label": "System Message",
            "name": "systemMessage",
            "type": "string",
            "default": "You are a helpful AI assistant.",
            "description": "If Chat Prompt Template is provided, this will be ignored",
            "rows": 4,
            "optional": true,
            "additionalParams": true,
            "id": "toolAgent_0-input-systemMessage-string"
          },
          {
            "label": "Max Iterations",
            "name": "maxIterations",
            "type": "number",
            "optional": true,
            "additionalParams": true,
            "id": "toolAgent_0-input-maxIterations-number"
          }
        ],
        "inputAnchors": [
          {
            "label": "Tools",
            "name": "tools",
            "type": "Tool",
            "list": true,
            "id": "toolAgent_0-input-tools-Tool"
          },
          {
            "label": "Memory",
            "name": "memory",
            "type": "BaseChatMemory",
            "id": "toolAgent_0-input-memory-BaseChatMemory"
          },
          {
            "label": "Tool Calling Chat Model",
            "name": "model",
            "type": "BaseChatModel",
            "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat",
            "id": "toolAgent_0-input-model-BaseChatModel"
          },
          {
            "label": "Chat Prompt Template",
            "name": "chatPromptTemplate",
            "type": "ChatPromptTemplate",
            "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
            "optional": true,
            "id": "toolAgent_0-input-chatPromptTemplate-ChatPromptTemplate"
          },
          {
            "label": "Input Moderation",
            "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
            "name": "inputModeration",
            "type": "Moderation",
            "optional": true,
            "list": true,
            "id": "toolAgent_0-input-inputModeration-Moderation"
          }
        ],
        "inputs": {
          "tools": [
            "{{openAPIToolkit_0.data.instance}}"
          ],
          "memory": "{{bufferMemory_0.data.instance}}",
          "model": "{{chatOpenAI_0.data.instance}}",
          "chatPromptTemplate": "",
          "systemMessage": "You are a helpful AI assistant.",
          "inputModeration": "",
          "maxIterations": ""
        },
        "outputAnchors": [
          {
            "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable",
            "name": "toolAgent",
            "label": "AgentExecutor",
            "description": "Agent that uses Function Calling to pick the tools and args to call",
            "type": "AgentExecutor | BaseChain | Runnable"
          }
        ],
        "outputs": {},
        "selected": false
      },
      "width": 300,
      "height": 484,
      "selected": false,
      "positionAbsolute": {
        "x": 1293.1879986131244,
        "y": 191.1741937636126
      },
      "dragging": false
    },
    {
      "id": "bufferMemory_0",
      "position": {
        "x": 828.6997922809826,
        "y": 483.14472669268787
      },
      "type": "customNode",
      "data": {
        "id": "bufferMemory_0",
        "label": "Buffer Memory",
        "version": 2,
        "name": "bufferMemory",
        "type": "BufferMemory",
        "baseClasses": [
          "BufferMemory",
          "BaseChatMemory",
          "BaseMemory"
        ],
        "category": "Memory",
        "description": "Retrieve chat messages stored in database",
        "inputParams": [
          {
            "label": "Session Id",
            "name": "sessionId",
            "type": "string",
            "description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.ciniter.com/memory#ui-and-embedded-chat\">more</a>",
            "default": "",
            "additionalParams": true,
            "optional": true,
            "id": "bufferMemory_0-input-sessionId-string"
          },
          {
            "label": "Memory Key",
            "name": "memoryKey",
            "type": "string",
            "default": "chat_history",
            "additionalParams": true,
            "id": "bufferMemory_0-input-memoryKey-string"
          }
        ],
        "inputAnchors": [],
        "inputs": {
          "sessionId": "",
          "memoryKey": "chat_history"
        },
        "outputAnchors": [
          {
            "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
            "name": "bufferMemory",
            "label": "BufferMemory",
            "description": "Retrieve chat messages stored in database",
            "type": "BufferMemory | BaseChatMemory | BaseMemory"
          }
        ],
        "outputs": {},
        "selected": false
      },
      "width": 300,
      "height": 251,
      "selected": false,
      "positionAbsolute": {
        "x": 828.6997922809826,
        "y": 483.14472669268787
      },
      "dragging": false
    },
    {
      "id": "chatOpenAI_0",
      "position": {
        "x": 427.0142725311101,
        "y": -43.8666581648755
      },
      "type": "customNode",
      "data": {
        "id": "chatOpenAI_0",
        "label": "ChatOpenAI",
        "version": 7,
        "name": "chatOpenAI",
        "type": "ChatOpenAI",
        "baseClasses": [
          "ChatOpenAI",
          "BaseChatModel",
          "BaseLanguageModel",
          "Runnable"
        ],
        "category": "Chat Models",
        "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
        "inputParams": [
          {
            "label": "Connect Credential",
            "name": "credential",
            "type": "credential",
            "credentialNames": [
              "openAIApi"
            ],
            "id": "chatOpenAI_0-input-credential-credential"
          },
          {
            "label": "Model Name",
            "name": "modelName",
            "type": "asyncOptions",
            "loadMethod": "listModels",
            "default": "gpt-3.5-turbo",
            "id": "chatOpenAI_0-input-modelName-asyncOptions"
          },
          {
            "label": "Temperature",
            "name": "temperature",
            "type": "number",
            "step": 0.1,
            "default": 0.9,
            "optional": true,
            "id": "chatOpenAI_0-input-temperature-number"
          },
          {
            "label": "Max Tokens",
            "name": "maxTokens",
            "type": "number",
            "step": 1,
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-maxTokens-number"
          },
          {
            "label": "Top Probability",
            "name": "topP",
            "type": "number",
            "step": 0.1,
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-topP-number"
          },
          {
            "label": "Frequency Penalty",
            "name": "frequencyPenalty",
            "type": "number",
            "step": 0.1,
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-frequencyPenalty-number"
          },
          {
            "label": "Presence Penalty",
            "name": "presencePenalty",
            "type": "number",
            "step": 0.1,
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-presencePenalty-number"
          },
          {
            "label": "Timeout",
            "name": "timeout",
            "type": "number",
            "step": 1,
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-timeout-number"
          },
          {
            "label": "BasePath",
            "name": "basepath",
            "type": "string",
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-basepath-string"
          },
          {
            "label": "Proxy Url",
            "name": "proxyUrl",
            "type": "string",
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-proxyUrl-string"
          },
          {
            "label": "BaseOptions",
            "name": "baseOptions",
            "type": "json",
            "optional": true,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-baseOptions-json"
          },
          {
            "label": "Allow Image Uploads",
            "name": "allowImageUploads",
            "type": "boolean",
            "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, Conversational Agent, Tool Agent",
            "default": false,
            "optional": true,
            "id": "chatOpenAI_0-input-allowImageUploads-boolean"
          },
          {
            "label": "Image Resolution",
            "description": "This parameter controls the resolution in which the model views the image.",
            "name": "imageResolution",
            "type": "options",
            "options": [
              {
                "label": "Low",
                "name": "low"
              },
              {
                "label": "High",
                "name": "high"
              },
              {
                "label": "Auto",
                "name": "auto"
              }
            ],
            "default": "low",
            "optional": false,
            "additionalParams": true,
            "id": "chatOpenAI_0-input-imageResolution-options"
          }
        ],
        "inputAnchors": [
          {
            "label": "Cache",
            "name": "cache",
            "type": "BaseCache",
            "optional": true,
            "id": "chatOpenAI_0-input-cache-BaseCache"
          }
        ],
        "inputs": {
          "cache": "",
          "modelName": "gpt-3.5-turbo",
          "temperature": "0.9",
          "maxTokens": "",
          "topP": "",
          "frequencyPenalty": "",
          "presencePenalty": "",
          "timeout": "",
          "basepath": "",
          "proxyUrl": "",
          "baseOptions": "",
          "allowImageUploads": "",
          "imageResolution": "low"
        },
        "outputAnchors": [
          {
            "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
            "name": "chatOpenAI",
            "label": "ChatOpenAI",
            "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
            "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
          }
        ],
        "outputs": {},
        "selected": false
      },
      "width": 300,
      "height": 669,
      "selected": false,
      "positionAbsolute": {
        "x": 427.0142725311101,
        "y": -43.8666581648755
      },
      "dragging": false
    },
    {
      "id": "openAPIToolkit_0",
      "position": {
        "x": 831.9599233268808,
        "y": -32.87251462719371
      },
      "type": "customNode",
      "data": {
        "id": "openAPIToolkit_0",
        "label": "OpenAPI Toolkit",
        "version": 2,
        "name": "openAPIToolkit",
        "type": "OpenAPIToolkit",
        "baseClasses": [
          "OpenAPIToolkit",
          "Tool"
        ],
        "category": "Tools",
        "description": "Load OpenAPI specification, and converts each API endpoint to a tool",
        "inputParams": [
          {
            "label": "YAML File",
            "name": "yamlFile",
            "type": "file",
            "fileType": ".yaml",
            "id": "openAPIToolkit_0-input-yamlFile-file"
          },
          {
            "label": "Return Direct",
            "name": "returnDirect",
            "description": "Return the output of the tool directly to the user",
            "type": "boolean",
            "optional": true,
            "id": "openAPIToolkit_0-input-returnDirect-boolean"
          },
          {
            "label": "Headers",
            "name": "headers",
            "type": "json",
            "description": "Request headers to be sent with the API request. For example, {\"Authorization\": \"Bearer token\"}",
            "additionalParams": true,
            "optional": true,
            "id": "openAPIToolkit_0-input-headers-json"
          },
          {
            "label": "Custom Code",
            "name": "customCode",
            "type": "code",
            "hint": {
              "label": "How to use",
              "value": "- **Libraries:**  \n  You can use any libraries imported in CiniterFlow.\n\n- **Tool Input Arguments:**  \n  Tool input arguments are available as the following variables:\n  - `$PathParameters`\n  - `$QueryParameters`\n  - `$RequestBody`\n\n- **HTTP Requests:**  \n  By default, you can get the following values for making HTTP requests:\n  - `$url`\n  - `$options`\n\n- **Default Flow Config:**  \n  You can access the default flow configuration using these variables:\n  - `$flow.sessionId`\n  - `$flow.chatId`\n  - `$flow.chatflowId`\n  - `$flow.input`\n  - `$flow.state`\n\n- **Custom Variables:**  \n  You can get custom variables using the syntax:\n  - `$vars.<variable-name>`\n\n- **Return Value:**  \n  The function must return a **string** value at the end.\n\n```js\nconst fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n\n```\n"
            },
            "codeExample": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n",
            "description": "Custom code to return the output of the tool. The code should be a function that takes in the input and returns a string",
            "hideCodeExecute": true,
            "default": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n",
            "additionalParams": true,
            "id": "openAPIToolkit_0-input-customCode-code"
          }
        ],
        "inputAnchors": [],
        "inputs": {
          "returnDirect": "",
          "headers": "{\"Authorization\":\"Bearer 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\"}",
          "customCode": "const fetch = require('node-fetch');\nconst url = $url;\nconsole.log('url=', url);\nconst options = $options;\nconsole.log('options=', options);\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n    console.log('resp=', resp);\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n"
        },
        "outputAnchors": [
          {
            "id": "openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool",
            "name": "openAPIToolkit",
            "label": "OpenAPIToolkit",
            "description": "Load OpenAPI specification, and converts each API endpoint to a tool",
            "type": "OpenAPIToolkit | Tool"
          }
        ],
        "outputs": {},
        "selected": false
      },
      "width": 300,
      "height": 457,
      "selected": false,
      "positionAbsolute": {
        "x": 831.9599233268808,
        "y": -32.87251462719371
      },
      "dragging": false
    }
  ],
  "edges": [
    {
      "source": "bufferMemory_0",
      "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
      "target": "toolAgent_0",
      "targetHandle": "toolAgent_0-input-memory-BaseChatMemory",
      "type": "buttonedge",
      "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-toolAgent_0-toolAgent_0-input-memory-BaseChatMemory"
    },
    {
      "source": "chatOpenAI_0",
      "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
      "target": "toolAgent_0",
      "targetHandle": "toolAgent_0-input-model-BaseChatModel",
      "type": "buttonedge",
      "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-toolAgent_0-toolAgent_0-input-model-BaseChatModel"
    },
    {
      "source": "openAPIToolkit_0",
      "sourceHandle": "openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool",
      "target": "toolAgent_0",
      "targetHandle": "toolAgent_0-input-tools-Tool",
      "type": "buttonedge",
      "id": "openAPIToolkit_0-openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool-toolAgent_0-toolAgent_0-input-tools-Tool"
    }
  ]
}